From 27a9d6d6143f17e2cc2f6426b8a1e2843f7fe705 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Thu, 20 Oct 2005 11:25:55 +0100 Subject: [PATCH] Fix NMI race with context switch. The machine used to auto reboot if an NMI was received in a critical time window when context switching domains. There is a small time window when the GDT may become unmapped (after CR3 is updated and before setting GDTR with the new GDT during a domain context switch. If an NMI is received during this time window a triple fault is triggered causing the machine to auto reboot. Bug found and original patch proposed by Jose Renato Santos . Signed-off-by: Keir Fraser --- xen/arch/x86/domain.c | 21 ++++++++++++++++----- xen/arch/x86/setup.c | 16 ++++++++++++---- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 25302d0f5f..7d61eadfab 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -226,11 +226,9 @@ struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id) if ( (v->vcpu_id = vcpu_id) != 0 ) { - v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail; + v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail; v->arch.perdomain_ptes = d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT); - v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = - l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); } return v; @@ -256,6 +254,7 @@ void free_perdomain_pt(struct domain *d) void arch_do_createdomain(struct vcpu *v) { struct domain *d = v->domain; + int vcpuid; if ( is_idle_task(d) ) return; @@ -275,8 +274,20 @@ void arch_do_createdomain(struct vcpu *v) set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT, INVALID_M2P_ENTRY); v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; - v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = - l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); + + /* + * Map Xen segments into every VCPU's GDT, irrespective of whether every + * VCPU will actually be used. This avoids an NMI race during context + * switch: if we take an interrupt after switching CR3 but before switching + * GDT, and the old VCPU# is invalid in the new domain, we would otherwise + * try to load CS from an invalid table. + */ + for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) + { + d->arch.mm_perdomain_pt[ + (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = + l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); + } v->arch.guest_vtable = __linear_l2_table; v->arch.shadow_vtable = __shadow_linear_l2_table; diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index c6653708a1..388735fb49 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -141,6 +141,7 @@ static void __init do_initcalls(void) static void __init start_of_day(void) { int i; + unsigned long vgdt; early_cpu_init(); @@ -158,10 +159,17 @@ static void __init start_of_day(void) arch_do_createdomain(current); - /* Map default GDT into their final position in the idle page table. */ - map_pages_to_xen( - GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE, - virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR); + /* + * Map default GDT into its final positions in the idle page table. As + * noted in arch_do_createdomain(), we must map for every possible VCPU#. + */ + vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE; + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) + { + map_pages_to_xen( + vgdt, virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR); + vgdt += 1 << PDPT_VCPU_VA_SHIFT; + } find_smp_config(); -- 2.30.2